local_flush_tlb();
}
+int try_flush_tlb_mask(unsigned long mask)
+{
+ if ( mask & (1 << smp_processor_id()) )
+ {
+ local_flush_tlb();
+ mask &= ~(1 << smp_processor_id());
+ }
+
+ if ( mask != 0 )
+ {
+ if ( unlikely(!spin_trylock(&tlbstate_lock)) )
+ return 0;
+ flush_cpumask = mask;
+ send_IPI_mask(mask, INVALIDATE_TLB_VECTOR);
+ while ( flush_cpumask != 0 )
+ {
+ rep_nop();
+ barrier();
+ }
+ spin_unlock(&tlbstate_lock);
+ }
+
+ return 1;
+}
+
void flush_tlb_mask(unsigned long mask)
{
+ /* WARNING: Only try_flush_tlb_mask() is safe in IRQ context. */
if ( unlikely(in_irq()) )
BUG();
#include <xeno/blkdev.h>
#include <xeno/console.h>
#include <xeno/vbd.h>
-
#include <asm/i387.h>
/*
if ( unlikely(page == NULL) )
return NULL;
- if ( unlikely((mask = page->u.cpu_mask) != 0) )
+ if ( (mask = page->u.cpu_mask) != 0 )
{
pfn_stamp = page->tlbflush_timestamp;
for ( i = 0; (mask != 0) && (i < NR_CPUS); i++ )
{
- if ( unlikely(mask & (1<<i)) )
+ if ( mask & (1<<i) )
{
cpu_stamp = tlbflush_time[i];
if ( !NEED_FLUSH(cpu_stamp, pfn_stamp) )
if ( unlikely(mask != 0) )
{
- if ( unlikely(in_irq()) )
- {
- DPRINTK("Returning NULL from alloc_domain_page: in_irq\n");
+ /* In IRQ ctxt, flushing is best-effort only, to avoid deadlock. */
+ if ( likely(!in_irq()) )
+ flush_tlb_mask(mask);
+ else if ( unlikely(!try_flush_tlb_mask(mask)) )
goto free_and_exit;
- }
perfc_incrc(need_flush_tlb_flush);
- flush_tlb_mask(mask);
}
}
* used for a purpose that may have caused the CPU's TLB to become tainted.
*/
#define NEED_FLUSH(_cpu_stamp, _lastuse_stamp) \
- (((_cpu_stamp) > (_lastuse_stamp)) || \
- (((_lastuse_stamp) - (_cpu_stamp)) > (2*GLOBAL_FLUSH_PERIOD)))
+ (((_cpu_stamp) <= (_lastuse_stamp)) && \
+ (((_lastuse_stamp) - (_cpu_stamp)) <= (2*GLOBAL_FLUSH_PERIOD)))
extern unsigned long tlbflush_mask;
extern unsigned long tlbflush_clock;
#ifndef CONFIG_SMP
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb()
-#define flush_tlb_all_pge() __flush_tlb_pge()
-#define local_flush_tlb() __flush_tlb()
-#define flush_tlb_cpu(_cpu) __flush_tlb()
-#define flush_tlb_mask(_mask) __flush_tlb()
+#define flush_tlb() __flush_tlb()
+#define flush_tlb_all() __flush_tlb()
+#define flush_tlb_all_pge() __flush_tlb_pge()
+#define local_flush_tlb() __flush_tlb()
+#define flush_tlb_cpu(_cpu) __flush_tlb()
+#define flush_tlb_mask(_mask) __flush_tlb()
+#define try_flush_tlb_mask(_mask) __flush_tlb()
#else
#include <xeno/smp.h>
+extern int try_flush_tlb_mask(unsigned long mask);
extern void flush_tlb_mask(unsigned long mask);
extern void flush_tlb_all_pge(void);
(pte & ~PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT |
((new_page - frame_table) << PAGE_SHIFT))) != pte )
{
+ DPRINTK("PTE was modified or reused! %08lx %08lx\n", pte, *ptep);
unmap_domain_mem(ptep);
/* At some point maybe should have 'new_page' in error response. */
put_page_and_type(new_page);
goto rx_unmap_and_continue;
}
+ buf_page->tlbflush_timestamp = tlbflush_clock;
+ buf_page->u.cpu_mask = 1 << p->processor;
+
/* Remove from the domain's allocation list. */
spin_lock(&p->page_list_lock);
list_del(&buf_page->list);
for ( i = vif->rx_req_cons;
(i != shared_idxs->rx_req_prod) &&
((i-vif->rx_resp_prod) != RX_RING_SIZE);
- i++ );
+ i++ )
{
make_rx_response(vif, shared_rings->rx_ring[MASK_NET_RX_IDX(i)].req.id,
0, RING_STATUS_DROPPED, 0);
/* Give the buffer page back to the domain. */
page = &frame_table[rx->buf_pfn];
+ page->u.domain = p;
spin_lock(&p->page_list_lock);
list_add(&page->list, &p->page_list);
page->count_and_flags = PGC_allocated | 2;
unlikely(cmpxchg(ptep, pte, (rx->buf_pfn<<PAGE_SHIFT) |
(pte & ~PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT)
!= pte) )
+ {
+ DPRINTK("PTE was modified or reused! %08lx %08lx\n", pte, *ptep);
put_page_and_type(page);
+ }
unmap_domain_mem(ptep);
put_page_and_type(&frame_table[rx->pte_ptr >> PAGE_SHIFT]);
* {tx,rx}_skbs store outstanding skbuffs. The first entry in each
* array is an index into a chain of free entries.
*/
- struct sk_buff *tx_skbs[TX_RING_SIZE];
- struct sk_buff *rx_skbs[RX_RING_SIZE];
+ struct sk_buff *tx_skbs[TX_RING_SIZE+1];
+ struct sk_buff *rx_skbs[RX_RING_SIZE+1];
};
/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
memset(np->net_idx, 0, sizeof(*np->net_idx));
/* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
- for ( i = 0; i < TX_RING_SIZE; i++ )
+ for ( i = 0; i <= TX_RING_SIZE; i++ )
np->tx_skbs[i] = (void *)(i+1);
- for ( i = 0; i < RX_RING_SIZE; i++ )
+ for ( i = 0; i <= RX_RING_SIZE; i++ )
np->rx_skbs[i] = (void *)(i+1);
wmb();